The memory barrier usage in updating the random ptr hash for %p in

vsprintf is incorrect. Instead of adding the read memory barrier
 into vsprintf() which will cause a slight degradation to a commonly
 used function in the kernel just to solve a very unlikely race
 condition that can only happen at boot up, change the code from
 using a variable branch to a static_branch. Not only does this solve
 the race condition, it actually will improve the performance of
 vsprintf() by removing the conditional branch that is only needed
 at boot.
 -----BEGIN PGP SIGNATURE-----
 
 iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCWvwtURQccm9zdGVkdEBn
 b29kbWlzLm9yZwAKCRAp5XQQmuv6ql1NAP4g19xnu23+fPADOLWj8CShIGroIPvC
 vNkg8UMNW6qWyAEA32aPJyRAC/hqqEgYbBBUHFNVyqdwZ38os7gnqzfBRwQ=
 =Vw/o
 -----END PGP SIGNATURE-----

Merge tag 'trace-v4.17-rc5-vsprintf' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull memory barrier for from Steven Rostedt:
 "The memory barrier usage in updating the random ptr hash for %p in
  vsprintf is incorrect.

  Instead of adding the read memory barrier into vsprintf() which will
  cause a slight degradation to a commonly used function in the kernel
  just to solve a very unlikely race condition that can only happen at
  boot up, change the code from using a variable branch to a
  static_branch.

  Not only does this solve the race condition, it actually will improve
  the performance of vsprintf() by removing the conditional branch that
  is only needed at boot"

* tag 'trace-v4.17-rc5-vsprintf' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  vsprintf: Replace memory barrier with static_key for random_ptr_key update
This commit is contained in:
Linus Torvalds 2018-05-16 11:02:54 -07:00
commit 9d38cd06c3
1 changed files with 15 additions and 11 deletions

View File

@ -1669,19 +1669,22 @@ char *pointer_string(char *buf, char *end, const void *ptr,
return number(buf, end, (unsigned long int)ptr, spec);
}
static bool have_filled_random_ptr_key __read_mostly;
static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
static siphash_key_t ptr_key __read_mostly;
static void enable_ptr_key_workfn(struct work_struct *work)
{
get_random_bytes(&ptr_key, sizeof(ptr_key));
/* Needs to run from preemptible context */
static_branch_disable(&not_filled_random_ptr_key);
}
static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
static void fill_random_ptr_key(struct random_ready_callback *unused)
{
get_random_bytes(&ptr_key, sizeof(ptr_key));
/*
* have_filled_random_ptr_key==true is dependent on get_random_bytes().
* ptr_to_id() needs to see have_filled_random_ptr_key==true
* after get_random_bytes() returns.
*/
smp_mb();
WRITE_ONCE(have_filled_random_ptr_key, true);
/* This may be in an interrupt handler. */
queue_work(system_unbound_wq, &enable_ptr_key_work);
}
static struct random_ready_callback random_ready = {
@ -1695,7 +1698,8 @@ static int __init initialize_ptr_random(void)
if (!ret) {
return 0;
} else if (ret == -EALREADY) {
fill_random_ptr_key(&random_ready);
/* This is in preemptible context */
enable_ptr_key_workfn(&enable_ptr_key_work);
return 0;
}
@ -1709,7 +1713,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
unsigned long hashval;
const int default_width = 2 * sizeof(ptr);
if (unlikely(!have_filled_random_ptr_key)) {
if (static_branch_unlikely(&not_filled_random_ptr_key)) {
spec.field_width = default_width;
/* string length must be less than default_width */
return string(buf, end, "(ptrval)", spec);