aboutsummaryrefslogtreecommitdiffstats
path: root/java
diff options
context:
space:
mode:
authorjkim <jkim@FreeBSD.org>2017-12-01 09:56:02 +0800
committerjkim <jkim@FreeBSD.org>2017-12-01 09:56:02 +0800
commit80161595fc3877a2d2772e4f7a8b73b8330a108d (patch)
tree4d47c6319b42ca7cba1162eacea533a76354c4da /java
parentd53432c118396e74e6dccd965f75c700558dda96 (diff)
downloadfreebsd-ports-gnome-80161595fc3877a2d2772e4f7a8b73b8330a108d.tar.gz
freebsd-ports-gnome-80161595fc3877a2d2772e4f7a8b73b8330a108d.tar.zst
freebsd-ports-gnome-80161595fc3877a2d2772e4f7a8b73b8330a108d.zip
Add support for "-XX:+UseLargePages" option.
Excerpted from the original submission: "FreeBSD on several architectures automatically utilizes superpages. So, for the JVM's JIT-compiled code cache and data heap, much of those regions are already using superpages. However, the -XX:+UseLargePages option still serves a useful, if secondary, purpose on FreeBSD. Essentially, it informs the JVM's platform-independent layer what the supported page sizes are. So, when memory is mmap(2)ed and munmap(2)ed within the code cache and data heap, the addresses and sizes will be superpage-aligned and a multiple of the superpage size, respectively. Currently, without this patch, the code cache, for example, typically starts at an unaligned address, so the initial portion of the code cache can never be a superpage. Similarly, unaligned munmap(2)s on the data heap force the kernel to demote superpages to 4KB pages, and this patch eliminates those demotions. This patch both introduces the code needed on FreeBSD to implement -XX:+UseLargePages and deletes code that was copied from Linux that is, to the best of my knowledge, useless on any version of BSD, i.e., UseHugeTLBFS and UseSHM. The additions are in part based on the Solaris version of os_solaris.cpp, e.g., the sorting of the page sizes array." Note I did minor style cleanup and regenerated the patches. Submitted by: alc
Diffstat (limited to 'java')
-rw-r--r--java/openjdk8/Makefile1
-rw-r--r--java/openjdk8/files/patch-hotspot_src_os_bsd_vm_globals__bsd.hpp27
-rw-r--r--java/openjdk8/files/patch-hotspot_src_os_bsd_vm_os__bsd.cpp195
-rw-r--r--java/openjdk8/files/patch-hotspot_src_os_bsd_vm_os__bsd.hpp11
-rw-r--r--java/openjdk8/files/patch-hotspot_src_share_vm_runtime_arguments.cpp12
5 files changed, 241 insertions, 5 deletions
diff --git a/java/openjdk8/Makefile b/java/openjdk8/Makefile
index e4d143acb27f..12a1b0e8a96a 100644
--- a/java/openjdk8/Makefile
+++ b/java/openjdk8/Makefile
@@ -2,6 +2,7 @@
PORTNAME= openjdk
PORTVERSION= ${JDK_MAJOR_VERSION}.${JDK_UPDATE_VERSION}.${JDK_BUILD_NUMBER:S/^0//}
+PORTREVISION= 1
CATEGORIES= java devel
MASTER_SITES= http://download.java.net/openjdk/jdk${JDK_MAJOR_VERSION}/promoted/b${DIST_BUILD_NUMBER}/:jdk \
https://adopt-openjdk.ci.cloudbees.com/job/jtreg/${JTREG_JENKINS_BUILD}/artifact/:jtreg \
diff --git a/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_globals__bsd.hpp b/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_globals__bsd.hpp
new file mode 100644
index 000000000000..5449e002c8f2
--- /dev/null
+++ b/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_globals__bsd.hpp
@@ -0,0 +1,27 @@
+--- hotspot/src/os/bsd/vm/globals_bsd.hpp.orig 2014-03-04 02:52:13 UTC
++++ hotspot/src/os/bsd/vm/globals_bsd.hpp
+@@ -35,19 +35,17 @@
+ product(bool, UseBsdPosixThreadCPUClocks, true, \
+ "enable fast Bsd Posix clocks where available") \
+ /* NB: The default value of UseBsdPosixThreadCPUClocks may be \
+- overridden in Arguments::parse_each_vm_init_arg. */ \
+- \
+- product(bool, UseHugeTLBFS, false, \
+- "Use MAP_HUGETLB for large pages") \
+- \
+- product(bool, UseSHM, false, \
+- "Use SYSV shared memory for large pages")
++ overridden in Arguments::parse_each_vm_init_arg. */
+
+ //
+ // Defines Bsd-specific default values. The flags are available on all
+ // platforms, but they may have different default values on other platforms.
+ //
++#ifdef __FreeBSD__
++define_pd_global(bool, UseLargePages, true);
++#else
+ define_pd_global(bool, UseLargePages, false);
++#endif
+ define_pd_global(bool, UseLargePagesIndividualAllocation, false);
+ define_pd_global(bool, UseOSErrorReporting, false);
+ define_pd_global(bool, UseThreadPriorities, true) ;
diff --git a/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_os__bsd.cpp b/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_os__bsd.cpp
index 54602f2b4dca..7991f1265b82 100644
--- a/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_os__bsd.cpp
+++ b/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_os__bsd.cpp
@@ -1,6 +1,6 @@
---- hotspot/src/os/bsd/vm/os_bsd.cpp.orig 2014-03-04 02:52:13 UTC
+--- hotspot/src/os/bsd/vm/os_bsd.cpp.orig 2017-12-01 00:32:02 UTC
+++ hotspot/src/os/bsd/vm/os_bsd.cpp
-@@ -1452,6 +1452,10 @@ void * os::dll_load(const char *filename, char *ebuf,
+@@ -1588,6 +1588,10 @@ void * os::dll_load(const char *filename, char *ebuf,
#define EM_X86_64 62 /* AMD x86-64 */
#endif
@@ -11,7 +11,7 @@
static const arch_t arch_array[]={
{EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
{EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
-@@ -1468,7 +1472,8 @@ void * os::dll_load(const char *filename, char *ebuf,
+@@ -1604,7 +1608,8 @@ void * os::dll_load(const char *filename, char *ebuf,
{EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
{EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
{EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
@@ -21,7 +21,7 @@
};
#if (defined IA32)
-@@ -1485,6 +1490,8 @@ void * os::dll_load(const char *filename, char *ebuf,
+@@ -1621,6 +1626,8 @@ void * os::dll_load(const char *filename, char *ebuf,
static Elf32_Half running_arch_code=EM_PPC64;
#elif (defined __powerpc__)
static Elf32_Half running_arch_code=EM_PPC;
@@ -30,7 +30,7 @@
#elif (defined ARM)
static Elf32_Half running_arch_code=EM_ARM;
#elif (defined S390)
-@@ -1501,7 +1508,7 @@ void * os::dll_load(const char *filename, char *ebuf,
+@@ -1637,7 +1644,7 @@ void * os::dll_load(const char *filename, char *ebuf,
static Elf32_Half running_arch_code=EM_68K;
#else
#error Method os::dll_load requires that one of following is defined:\
@@ -39,3 +39,188 @@
#endif
// Identify compatability class for VM's architecture and library's architecture
+@@ -2395,14 +2402,18 @@ static address _highest_vm_reserved_address = NULL;
+ // 'requested_addr' is only treated as a hint, the return value may or
+ // may not start from the requested address. Unlike Bsd mmap(), this
+ // function returns NULL to indicate failure.
+-static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
++static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
+ char * addr;
+ int flags;
+
++ assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
++
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ if (fixed) {
+ assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address");
+ flags |= MAP_FIXED;
++ } else if (alignment_hint > 0) {
++ flags |= MAP_ALIGNED(ffs(alignment_hint) - 1);
+ }
+
+ // Map reserved/uncommitted pages PROT_NONE so we fail early if we
+@@ -2434,7 +2445,7 @@ static int anon_munmap(char * addr, size_t size) {
+
+ char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
+ size_t alignment_hint) {
+- return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
++ return anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
+ }
+
+ bool os::pd_release_memory(char* addr, size_t size) {
+@@ -2480,19 +2491,93 @@ bool os::unguard_memory(char* addr, size_t size) {
+ return bsd_mprotect(addr, size, PROT_READ|PROT_WRITE);
+ }
+
+-bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) {
+- return false;
+-}
+-
+ // Large page support
+
+ static size_t _large_page_size = 0;
+
+ void os::large_page_init() {
++ if (UseLargePages) {
++ // print a warning if any large page related flag is specified on command line
++ bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
++ !FLAG_IS_DEFAULT(LargePageSizeInBytes);
++
++ UseLargePages = Bsd::superpage_sanity_check(warn_on_failure, &_large_page_size);
++ }
+ }
+
++// Insertion sort for small arrays (descending order).
++static void insertion_sort_descending(size_t* array, int len) {
++ for (int i = 0; i < len; i++) {
++ size_t val = array[i];
++ for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
++ size_t tmp = array[key];
++ array[key] = array[key - 1];
++ array[key - 1] = tmp;
++ }
++ }
++}
+
++bool os::Bsd::superpage_sanity_check(bool warn, size_t* page_size) {
++#ifdef __FreeBSD__
++ const unsigned int usable_count = VM_Version::page_size_count();
++ if (usable_count == 1) {
++ return false;
++ }
++
++ // Fill the array of page sizes.
++ int n = ::getpagesizes(_page_sizes, page_sizes_max);
++ assert(n > 0, "FreeBSD bug?");
++
++ if (n == page_sizes_max) {
++ // Add a sentinel value (necessary only if the array was completely filled
++ // since it is static (zeroed at initialization)).
++ _page_sizes[--n] = 0;
++ DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
++ }
++ assert(_page_sizes[n] == 0, "missing sentinel");
++ trace_page_sizes("available page sizes", _page_sizes, n);
++
++ if (n == 1) return false; // Only one page size available.
++
++ // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
++ // select up to usable_count elements. First sort the array, find the first
++ // acceptable value, then copy the usable sizes to the top of the array and
++ // trim the rest. Make sure to include the default page size :-).
++ //
++ // A better policy could get rid of the 4M limit by taking the sizes of the
++ // important VM memory regions (java heap and possibly the code cache) into
++ // account.
++ insertion_sort_descending(_page_sizes, n);
++ const size_t size_limit =
++ FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
++ int beg;
++ for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
++ const int end = MIN2((int)usable_count, n) - 1;
++ for (int cur = 0; cur < end; ++cur, ++beg) {
++ _page_sizes[cur] = _page_sizes[beg];
++ }
++ _page_sizes[end] = vm_page_size();
++ _page_sizes[end + 1] = 0;
++
++ if (_page_sizes[end] > _page_sizes[end - 1]) {
++ // Default page size is not the smallest; sort again.
++ insertion_sort_descending(_page_sizes, end + 1);
++ }
++ *page_size = _page_sizes[0];
++
++ trace_page_sizes("usable page sizes", _page_sizes, end + 1);
++ return true;
++#else
++ return false;
++#endif
++}
++
++
+ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
++#ifdef __FreeBSD__
++ fatal("os::reserve_memory_special should not be called on FreeBSD.");
++ return NULL;
++#else
+ fatal("This code is not used or maintained.");
+
+ // "exec" is passed in but not used. Creating the shared image for
+@@ -2552,9 +2637,14 @@ char* os::reserve_memory_special(size_t bytes, size_t
+ MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
+
+ return addr;
++#endif
+ }
+
+ bool os::release_memory_special(char* base, size_t bytes) {
++#ifdef __FreeBSD__
++ fatal("os::release_memory_special should not be called on FreeBSD.");
++ return false;
++#else
+ if (MemTracker::tracking_level() > NMT_minimal) {
+ Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+ // detaching the SHM segment will also delete it, see reserve_memory_special()
+@@ -2568,21 +2658,28 @@ bool os::release_memory_special(char* base, size_t byt
+ } else {
+ return shmdt(base) == 0;
+ }
++#endif
+ }
+
+ size_t os::large_page_size() {
+ return _large_page_size;
+ }
+
+-// HugeTLBFS allows application to commit large page memory on demand;
+-// with SysV SHM the entire memory region must be allocated as shared
+-// memory.
++// FreeBSD allows application to commit large page memory on demand.
+ bool os::can_commit_large_page_memory() {
+- return UseHugeTLBFS;
++#ifdef __FreeBSD__
++ return true;
++#else
++ return false;
++#endif
+ }
+
+ bool os::can_execute_large_page_memory() {
+- return UseHugeTLBFS;
++#ifdef __FreeBSD__
++ return true;
++#else
++ return false;
++#endif
+ }
+
+ // Reserve memory at an arbitrary address, only if that area is
+@@ -2614,7 +2711,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, c
+
+ // Bsd mmap allows caller to pass an address as hint; give it a try first,
+ // if kernel honors the hint then we can return immediately.
+- char * addr = anon_mmap(requested_addr, bytes, false);
++ char * addr = anon_mmap(requested_addr, bytes, 0, false);
+ if (addr == requested_addr) {
+ return requested_addr;
+ }
diff --git a/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_os__bsd.hpp b/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_os__bsd.hpp
new file mode 100644
index 000000000000..1f513305cc5b
--- /dev/null
+++ b/java/openjdk8/files/patch-hotspot_src_os_bsd_vm_os__bsd.hpp
@@ -0,0 +1,11 @@
+--- hotspot/src/os/bsd/vm/os_bsd.hpp.orig 2017-12-01 00:32:02 UTC
++++ hotspot/src/os/bsd/vm/os_bsd.hpp
+@@ -87,7 +87,7 @@ class Bsd {
+ static void rebuild_cpu_to_node_map();
+ static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
+
+- static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
++ static bool superpage_sanity_check(bool warn, size_t *page_size);
+
+ public:
+
diff --git a/java/openjdk8/files/patch-hotspot_src_share_vm_runtime_arguments.cpp b/java/openjdk8/files/patch-hotspot_src_share_vm_runtime_arguments.cpp
new file mode 100644
index 000000000000..a437f8fccb42
--- /dev/null
+++ b/java/openjdk8/files/patch-hotspot_src_share_vm_runtime_arguments.cpp
@@ -0,0 +1,12 @@
+--- hotspot/src/share/vm/runtime/arguments.cpp.orig 2017-12-01 00:32:02.284351000 +0000
++++ hotspot/src/share/vm/runtime/arguments.cpp 2017-12-01 00:38:48.362549000 +0000
+@@ -3974,7 +3974,8 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
+ hotspotrc, hotspotrc);
+ }
+
+-#ifdef _ALLBSD_SOURCE // UseLargePages is not yet supported on BSD.
++#if defined(_ALLBSD_SOURCE) && !defined(__FreeBSD__)
++ // UseLargePages is not yet supported on BSD.
+ UNSUPPORTED_OPTION(UseLargePages, "-XX:+UseLargePages");
+ #endif
+