RuntimeError: DataLoader worker is killed by signal: Illegal instruction.












1















I am using the following code. I run this on a GPU machine with below config:



nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2017 NVIDIA Corporation
Built on Fri_Sep__1_21:08:03_CDT_2017
Cuda compilation tools, release 9.0, V9.0.176


I am using pytorch version 1.0.0.dev20181123.



I face this error: RuntimeError: DataLoader worker (pid 23646) is killed by signal: Illegal instruction.



Thinking that its a shared memory problem, I tried the solution here at: https://www.lucidarme.me/increase-shared-memory-limit/



It didn't help. Any pointers?



import argparse
import json
import os
import torch

#=====START: ADDED FOR DISTRIBUTED======
from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
from torch.utils.data.distributed import DistributedSampler
#=====END: ADDED FOR DISTRIBUTED======

from torch.utils.data import DataLoader
from glow import WaveGlow, WaveGlowLoss
from mel2samp import Mel2Samp

def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
optimizer.load_state_dict(checkpoint_dict['optimizer'])
model_for_loading = checkpoint_dict['model']
model.load_state_dict(model_for_loading.state_dict())
print("Loaded checkpoint '{}' (iteration {})" .format(
checkpoint_path, iteration))
return model, optimizer, iteration

def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
model_for_saving = WaveGlow(**waveglow_config).cuda()
model_for_saving.load_state_dict(model.state_dict())
torch.save({'model': model_for_saving,
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)

def train(num_gpus, rank, group_name, output_directory, epochs, learning_rate,
sigma, iters_per_checkpoint, batch_size, seed, checkpoint_path):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
init_distributed(rank, num_gpus, group_name, **dist_config)
#=====END: ADDED FOR DISTRIBUTED======

criterion = WaveGlowLoss(sigma)
model = WaveGlow(**waveglow_config).cuda()

#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
model = apply_gradient_allreduce(model)
#=====END: ADDED FOR DISTRIBUTED======

optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Load checkpoint if one exists
iteration = 0
if checkpoint_path != "":
model, optimizer, iteration = load_checkpoint(checkpoint_path, model,
optimizer)
iteration += 1 # next iteration is iteration + 1

trainset = Mel2Samp(**data_config)
# =====START: ADDED FOR DISTRIBUTED======
train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None
# =====END: ADDED FOR DISTRIBUTED======
train_loader = DataLoader(trainset, num_workers=1, shuffle=False,
sampler=train_sampler,
batch_size=batch_size,
pin_memory=False,
drop_last=True)

# Get shared output_directory ready
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
print("output directory", output_directory)

model.train()
epoch_offset = max(0, int(iteration / len(train_loader)))
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
model.zero_grad()

mel, audio = batch
mel = torch.autograd.Variable(mel.cuda())
audio = torch.autograd.Variable(audio.cuda())
outputs = model((mel, audio))

loss = criterion(outputs)
if num_gpus > 1:
reduced_loss = reduce_tensor(loss.data, num_gpus).item()
else:
reduced_loss = loss.item()
loss.backward()
optimizer.step()

print("{}:t{:.9f}".format(iteration, reduced_loss))

if (iteration % iters_per_checkpoint == 0):
if rank == 0:
checkpoint_path = "{}/waveglow_{}".format(
output_directory, iteration)
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)

iteration += 1

if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-r', '--rank', type=int, default=0,
help='rank of process for distributed')
parser.add_argument('-g', '--group_name', type=str, default='',
help='name of group for distributed')
args = parser.parse_args()

# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
config = json.loads(data)
train_config = config["train_config"]
global data_config
data_config = config["data_config"]
global dist_config
dist_config = config["dist_config"]
global waveglow_config
waveglow_config = config["waveglow_config"]

num_gpus = torch.cuda.device_count()
if num_gpus > 1:
if args.group_name == '':
print("WARNING: Multiple GPUs detected but no distributed group set")
print("Only running 1 GPU. Use distributed.py for multiple GPUs")
num_gpus = 1

if num_gpus == 1 and args.rank != 0:
raise Exception("Doing single GPU training on rank > 0")

torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
train(num_gpus, args.rank, args.group_name, **train_config)









share|improve this question



























    1















    I am using the following code. I run this on a GPU machine with below config:



    nvcc: NVIDIA (R) Cuda compiler driver
    Copyright (c) 2005-2017 NVIDIA Corporation
    Built on Fri_Sep__1_21:08:03_CDT_2017
    Cuda compilation tools, release 9.0, V9.0.176


    I am using pytorch version 1.0.0.dev20181123.



    I face this error: RuntimeError: DataLoader worker (pid 23646) is killed by signal: Illegal instruction.



    Thinking that its a shared memory problem, I tried the solution here at: https://www.lucidarme.me/increase-shared-memory-limit/



    It didn't help. Any pointers?



    import argparse
    import json
    import os
    import torch

    #=====START: ADDED FOR DISTRIBUTED======
    from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
    from torch.utils.data.distributed import DistributedSampler
    #=====END: ADDED FOR DISTRIBUTED======

    from torch.utils.data import DataLoader
    from glow import WaveGlow, WaveGlowLoss
    from mel2samp import Mel2Samp

    def load_checkpoint(checkpoint_path, model, optimizer):
    assert os.path.isfile(checkpoint_path)
    checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
    iteration = checkpoint_dict['iteration']
    optimizer.load_state_dict(checkpoint_dict['optimizer'])
    model_for_loading = checkpoint_dict['model']
    model.load_state_dict(model_for_loading.state_dict())
    print("Loaded checkpoint '{}' (iteration {})" .format(
    checkpoint_path, iteration))
    return model, optimizer, iteration

    def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
    print("Saving model and optimizer state at iteration {} to {}".format(
    iteration, filepath))
    model_for_saving = WaveGlow(**waveglow_config).cuda()
    model_for_saving.load_state_dict(model.state_dict())
    torch.save({'model': model_for_saving,
    'iteration': iteration,
    'optimizer': optimizer.state_dict(),
    'learning_rate': learning_rate}, filepath)

    def train(num_gpus, rank, group_name, output_directory, epochs, learning_rate,
    sigma, iters_per_checkpoint, batch_size, seed, checkpoint_path):
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    #=====START: ADDED FOR DISTRIBUTED======
    if num_gpus > 1:
    init_distributed(rank, num_gpus, group_name, **dist_config)
    #=====END: ADDED FOR DISTRIBUTED======

    criterion = WaveGlowLoss(sigma)
    model = WaveGlow(**waveglow_config).cuda()

    #=====START: ADDED FOR DISTRIBUTED======
    if num_gpus > 1:
    model = apply_gradient_allreduce(model)
    #=====END: ADDED FOR DISTRIBUTED======

    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # Load checkpoint if one exists
    iteration = 0
    if checkpoint_path != "":
    model, optimizer, iteration = load_checkpoint(checkpoint_path, model,
    optimizer)
    iteration += 1 # next iteration is iteration + 1

    trainset = Mel2Samp(**data_config)
    # =====START: ADDED FOR DISTRIBUTED======
    train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None
    # =====END: ADDED FOR DISTRIBUTED======
    train_loader = DataLoader(trainset, num_workers=1, shuffle=False,
    sampler=train_sampler,
    batch_size=batch_size,
    pin_memory=False,
    drop_last=True)

    # Get shared output_directory ready
    if rank == 0:
    if not os.path.isdir(output_directory):
    os.makedirs(output_directory)
    os.chmod(output_directory, 0o775)
    print("output directory", output_directory)

    model.train()
    epoch_offset = max(0, int(iteration / len(train_loader)))
    # ================ MAIN TRAINNIG LOOP! ===================
    for epoch in range(epoch_offset, epochs):
    print("Epoch: {}".format(epoch))
    for i, batch in enumerate(train_loader):
    model.zero_grad()

    mel, audio = batch
    mel = torch.autograd.Variable(mel.cuda())
    audio = torch.autograd.Variable(audio.cuda())
    outputs = model((mel, audio))

    loss = criterion(outputs)
    if num_gpus > 1:
    reduced_loss = reduce_tensor(loss.data, num_gpus).item()
    else:
    reduced_loss = loss.item()
    loss.backward()
    optimizer.step()

    print("{}:t{:.9f}".format(iteration, reduced_loss))

    if (iteration % iters_per_checkpoint == 0):
    if rank == 0:
    checkpoint_path = "{}/waveglow_{}".format(
    output_directory, iteration)
    save_checkpoint(model, optimizer, learning_rate, iteration,
    checkpoint_path)

    iteration += 1

    if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config', type=str,
    help='JSON file for configuration')
    parser.add_argument('-r', '--rank', type=int, default=0,
    help='rank of process for distributed')
    parser.add_argument('-g', '--group_name', type=str, default='',
    help='name of group for distributed')
    args = parser.parse_args()

    # Parse configs. Globals nicer in this case
    with open(args.config) as f:
    data = f.read()
    config = json.loads(data)
    train_config = config["train_config"]
    global data_config
    data_config = config["data_config"]
    global dist_config
    dist_config = config["dist_config"]
    global waveglow_config
    waveglow_config = config["waveglow_config"]

    num_gpus = torch.cuda.device_count()
    if num_gpus > 1:
    if args.group_name == '':
    print("WARNING: Multiple GPUs detected but no distributed group set")
    print("Only running 1 GPU. Use distributed.py for multiple GPUs")
    num_gpus = 1

    if num_gpus == 1 and args.rank != 0:
    raise Exception("Doing single GPU training on rank > 0")

    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = False
    train(num_gpus, args.rank, args.group_name, **train_config)









    share|improve this question

























      1












      1








      1








      I am using the following code. I run this on a GPU machine with below config:



      nvcc: NVIDIA (R) Cuda compiler driver
      Copyright (c) 2005-2017 NVIDIA Corporation
      Built on Fri_Sep__1_21:08:03_CDT_2017
      Cuda compilation tools, release 9.0, V9.0.176


      I am using pytorch version 1.0.0.dev20181123.



      I face this error: RuntimeError: DataLoader worker (pid 23646) is killed by signal: Illegal instruction.



      Thinking that its a shared memory problem, I tried the solution here at: https://www.lucidarme.me/increase-shared-memory-limit/



      It didn't help. Any pointers?



      import argparse
      import json
      import os
      import torch

      #=====START: ADDED FOR DISTRIBUTED======
      from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
      from torch.utils.data.distributed import DistributedSampler
      #=====END: ADDED FOR DISTRIBUTED======

      from torch.utils.data import DataLoader
      from glow import WaveGlow, WaveGlowLoss
      from mel2samp import Mel2Samp

      def load_checkpoint(checkpoint_path, model, optimizer):
      assert os.path.isfile(checkpoint_path)
      checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
      iteration = checkpoint_dict['iteration']
      optimizer.load_state_dict(checkpoint_dict['optimizer'])
      model_for_loading = checkpoint_dict['model']
      model.load_state_dict(model_for_loading.state_dict())
      print("Loaded checkpoint '{}' (iteration {})" .format(
      checkpoint_path, iteration))
      return model, optimizer, iteration

      def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
      print("Saving model and optimizer state at iteration {} to {}".format(
      iteration, filepath))
      model_for_saving = WaveGlow(**waveglow_config).cuda()
      model_for_saving.load_state_dict(model.state_dict())
      torch.save({'model': model_for_saving,
      'iteration': iteration,
      'optimizer': optimizer.state_dict(),
      'learning_rate': learning_rate}, filepath)

      def train(num_gpus, rank, group_name, output_directory, epochs, learning_rate,
      sigma, iters_per_checkpoint, batch_size, seed, checkpoint_path):
      torch.manual_seed(seed)
      torch.cuda.manual_seed(seed)
      #=====START: ADDED FOR DISTRIBUTED======
      if num_gpus > 1:
      init_distributed(rank, num_gpus, group_name, **dist_config)
      #=====END: ADDED FOR DISTRIBUTED======

      criterion = WaveGlowLoss(sigma)
      model = WaveGlow(**waveglow_config).cuda()

      #=====START: ADDED FOR DISTRIBUTED======
      if num_gpus > 1:
      model = apply_gradient_allreduce(model)
      #=====END: ADDED FOR DISTRIBUTED======

      optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

      # Load checkpoint if one exists
      iteration = 0
      if checkpoint_path != "":
      model, optimizer, iteration = load_checkpoint(checkpoint_path, model,
      optimizer)
      iteration += 1 # next iteration is iteration + 1

      trainset = Mel2Samp(**data_config)
      # =====START: ADDED FOR DISTRIBUTED======
      train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None
      # =====END: ADDED FOR DISTRIBUTED======
      train_loader = DataLoader(trainset, num_workers=1, shuffle=False,
      sampler=train_sampler,
      batch_size=batch_size,
      pin_memory=False,
      drop_last=True)

      # Get shared output_directory ready
      if rank == 0:
      if not os.path.isdir(output_directory):
      os.makedirs(output_directory)
      os.chmod(output_directory, 0o775)
      print("output directory", output_directory)

      model.train()
      epoch_offset = max(0, int(iteration / len(train_loader)))
      # ================ MAIN TRAINNIG LOOP! ===================
      for epoch in range(epoch_offset, epochs):
      print("Epoch: {}".format(epoch))
      for i, batch in enumerate(train_loader):
      model.zero_grad()

      mel, audio = batch
      mel = torch.autograd.Variable(mel.cuda())
      audio = torch.autograd.Variable(audio.cuda())
      outputs = model((mel, audio))

      loss = criterion(outputs)
      if num_gpus > 1:
      reduced_loss = reduce_tensor(loss.data, num_gpus).item()
      else:
      reduced_loss = loss.item()
      loss.backward()
      optimizer.step()

      print("{}:t{:.9f}".format(iteration, reduced_loss))

      if (iteration % iters_per_checkpoint == 0):
      if rank == 0:
      checkpoint_path = "{}/waveglow_{}".format(
      output_directory, iteration)
      save_checkpoint(model, optimizer, learning_rate, iteration,
      checkpoint_path)

      iteration += 1

      if __name__ == "__main__":
      parser = argparse.ArgumentParser()
      parser.add_argument('-c', '--config', type=str,
      help='JSON file for configuration')
      parser.add_argument('-r', '--rank', type=int, default=0,
      help='rank of process for distributed')
      parser.add_argument('-g', '--group_name', type=str, default='',
      help='name of group for distributed')
      args = parser.parse_args()

      # Parse configs. Globals nicer in this case
      with open(args.config) as f:
      data = f.read()
      config = json.loads(data)
      train_config = config["train_config"]
      global data_config
      data_config = config["data_config"]
      global dist_config
      dist_config = config["dist_config"]
      global waveglow_config
      waveglow_config = config["waveglow_config"]

      num_gpus = torch.cuda.device_count()
      if num_gpus > 1:
      if args.group_name == '':
      print("WARNING: Multiple GPUs detected but no distributed group set")
      print("Only running 1 GPU. Use distributed.py for multiple GPUs")
      num_gpus = 1

      if num_gpus == 1 and args.rank != 0:
      raise Exception("Doing single GPU training on rank > 0")

      torch.backends.cudnn.enabled = True
      torch.backends.cudnn.benchmark = False
      train(num_gpus, args.rank, args.group_name, **train_config)









      share|improve this question














      I am using the following code. I run this on a GPU machine with below config:



      nvcc: NVIDIA (R) Cuda compiler driver
      Copyright (c) 2005-2017 NVIDIA Corporation
      Built on Fri_Sep__1_21:08:03_CDT_2017
      Cuda compilation tools, release 9.0, V9.0.176


      I am using pytorch version 1.0.0.dev20181123.



      I face this error: RuntimeError: DataLoader worker (pid 23646) is killed by signal: Illegal instruction.



      Thinking that its a shared memory problem, I tried the solution here at: https://www.lucidarme.me/increase-shared-memory-limit/



      It didn't help. Any pointers?



      import argparse
      import json
      import os
      import torch

      #=====START: ADDED FOR DISTRIBUTED======
      from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
      from torch.utils.data.distributed import DistributedSampler
      #=====END: ADDED FOR DISTRIBUTED======

      from torch.utils.data import DataLoader
      from glow import WaveGlow, WaveGlowLoss
      from mel2samp import Mel2Samp

      def load_checkpoint(checkpoint_path, model, optimizer):
      assert os.path.isfile(checkpoint_path)
      checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
      iteration = checkpoint_dict['iteration']
      optimizer.load_state_dict(checkpoint_dict['optimizer'])
      model_for_loading = checkpoint_dict['model']
      model.load_state_dict(model_for_loading.state_dict())
      print("Loaded checkpoint '{}' (iteration {})" .format(
      checkpoint_path, iteration))
      return model, optimizer, iteration

      def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
      print("Saving model and optimizer state at iteration {} to {}".format(
      iteration, filepath))
      model_for_saving = WaveGlow(**waveglow_config).cuda()
      model_for_saving.load_state_dict(model.state_dict())
      torch.save({'model': model_for_saving,
      'iteration': iteration,
      'optimizer': optimizer.state_dict(),
      'learning_rate': learning_rate}, filepath)

      def train(num_gpus, rank, group_name, output_directory, epochs, learning_rate,
      sigma, iters_per_checkpoint, batch_size, seed, checkpoint_path):
      torch.manual_seed(seed)
      torch.cuda.manual_seed(seed)
      #=====START: ADDED FOR DISTRIBUTED======
      if num_gpus > 1:
      init_distributed(rank, num_gpus, group_name, **dist_config)
      #=====END: ADDED FOR DISTRIBUTED======

      criterion = WaveGlowLoss(sigma)
      model = WaveGlow(**waveglow_config).cuda()

      #=====START: ADDED FOR DISTRIBUTED======
      if num_gpus > 1:
      model = apply_gradient_allreduce(model)
      #=====END: ADDED FOR DISTRIBUTED======

      optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

      # Load checkpoint if one exists
      iteration = 0
      if checkpoint_path != "":
      model, optimizer, iteration = load_checkpoint(checkpoint_path, model,
      optimizer)
      iteration += 1 # next iteration is iteration + 1

      trainset = Mel2Samp(**data_config)
      # =====START: ADDED FOR DISTRIBUTED======
      train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None
      # =====END: ADDED FOR DISTRIBUTED======
      train_loader = DataLoader(trainset, num_workers=1, shuffle=False,
      sampler=train_sampler,
      batch_size=batch_size,
      pin_memory=False,
      drop_last=True)

      # Get shared output_directory ready
      if rank == 0:
      if not os.path.isdir(output_directory):
      os.makedirs(output_directory)
      os.chmod(output_directory, 0o775)
      print("output directory", output_directory)

      model.train()
      epoch_offset = max(0, int(iteration / len(train_loader)))
      # ================ MAIN TRAINNIG LOOP! ===================
      for epoch in range(epoch_offset, epochs):
      print("Epoch: {}".format(epoch))
      for i, batch in enumerate(train_loader):
      model.zero_grad()

      mel, audio = batch
      mel = torch.autograd.Variable(mel.cuda())
      audio = torch.autograd.Variable(audio.cuda())
      outputs = model((mel, audio))

      loss = criterion(outputs)
      if num_gpus > 1:
      reduced_loss = reduce_tensor(loss.data, num_gpus).item()
      else:
      reduced_loss = loss.item()
      loss.backward()
      optimizer.step()

      print("{}:t{:.9f}".format(iteration, reduced_loss))

      if (iteration % iters_per_checkpoint == 0):
      if rank == 0:
      checkpoint_path = "{}/waveglow_{}".format(
      output_directory, iteration)
      save_checkpoint(model, optimizer, learning_rate, iteration,
      checkpoint_path)

      iteration += 1

      if __name__ == "__main__":
      parser = argparse.ArgumentParser()
      parser.add_argument('-c', '--config', type=str,
      help='JSON file for configuration')
      parser.add_argument('-r', '--rank', type=int, default=0,
      help='rank of process for distributed')
      parser.add_argument('-g', '--group_name', type=str, default='',
      help='name of group for distributed')
      args = parser.parse_args()

      # Parse configs. Globals nicer in this case
      with open(args.config) as f:
      data = f.read()
      config = json.loads(data)
      train_config = config["train_config"]
      global data_config
      data_config = config["data_config"]
      global dist_config
      dist_config = config["dist_config"]
      global waveglow_config
      waveglow_config = config["waveglow_config"]

      num_gpus = torch.cuda.device_count()
      if num_gpus > 1:
      if args.group_name == '':
      print("WARNING: Multiple GPUs detected but no distributed group set")
      print("Only running 1 GPU. Use distributed.py for multiple GPUs")
      num_gpus = 1

      if num_gpus == 1 and args.rank != 0:
      raise Exception("Doing single GPU training on rank > 0")

      torch.backends.cudnn.enabled = True
      torch.backends.cudnn.benchmark = False
      train(num_gpus, args.rank, args.group_name, **train_config)






      pytorch pycuda torchvision






      share|improve this question













      share|improve this question











      share|improve this question




      share|improve this question










      asked Nov 23 '18 at 16:13









      user1051505user1051505

      46021433




      46021433
























          0






          active

          oldest

          votes











          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "1"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53449927%2fruntimeerror-dataloader-worker-is-killed-by-signal-illegal-instruction%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown

























          0






          active

          oldest

          votes








          0






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes
















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Stack Overflow!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53449927%2fruntimeerror-dataloader-worker-is-killed-by-signal-illegal-instruction%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          Wiesbaden

          To store a contact into the json file from server.js file using a class in NodeJS

          Marschland